In [1]:
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
%matplotlib inline
In [2]:
#读取数据集
auto_df = pd.read_csv('data/Auto.csv', na_values = "?")
auto_df.dropna(inplace = True)
auto_df.head()
Out[2]:
In [11]:
fig, ax = plt.subplots()
ax.scatter(x=auto_df['horsepower'],y=auto_df['mpg'])
ax.set_ylabel('mpg')
Out[11]:
In [3]:
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import LeaveOneOut
from sklearn.metrics import mean_squared_error
In [4]:
clf = LinearRegression()
loo = LeaveOneOut(len(auto_df))
#loo提供了训练和测试的索引
X = auto_df[['horsepower']].values
y = auto_df['mpg'].values
n = np.shape(X)[0]
mses =[]
for train, test in loo:
Xtrain,ytrain,Xtest,ytest = X[train],y[train],X[test],y[test]
clf.fit(Xtrain,ytrain)
ypred = clf.predict(Xtest)
mses.append(mean_squared_error(ytest,ypred))
np.mean(mses)
Out[4]:
In [13]:
def loo_shortcut(X,y):
clf = LinearRegression()
clf.fit(X,y)
ypred = clf.predict(X)
xbar = np.mean(X,axis =0)
xsum = np.sum(np.power(X-xbar,2))
nrows = np.shape(X)[0]
mses = []
for row in range(0,nrows):
hi = (1 / nrows) + (np.sum(X[row] - xbar) ** 2 / xsum)
mse = ((y[row] - ypred[row])/(1-hi))**2
mses.append(mse)
return np.mean(mses)
loo_shortcut(auto_df[['horsepower']].values,auto_df['mpg'].values)
Out[13]:
In [16]:
# LOOCV 应用于同一种模型不同复杂度的选择
auto_df['horsepower^2'] = auto_df['horsepower'] * auto_df['horsepower']
auto_df['horsepower^3'] = auto_df['horsepower^2'] * auto_df['horsepower']
auto_df['horsepower^4'] = auto_df['horsepower^3'] * auto_df['horsepower']
auto_df['horsepower^5'] = auto_df['horsepower^4'] * auto_df['horsepower']
auto_df['unit'] = 1
colnames = ["unit", "horsepower", "horsepower^2", "horsepower^3", "horsepower^4", "horsepower^5"]
cv_errors = []
for ncols in range(2,6):
X = auto_df[colnames[0:ncols]]
y = auto_df['mpg']
clf = LinearRegression()
clf.fit(X,y)
cv_errors.append(loo_shortcut(X.values,y.values))
plt.plot(range(1,5),cv_errors)
plt.xlabel('degree')
plt.ylabel('cv.error')
Out[16]:
In [17]:
from sklearn.cross_validation import KFold
In [19]:
cv_errors = []
for ncols in range(2,6):
X = auto_df[colnames[0:ncols]].values
y = auto_df['mpg'].values
kfold = KFold(len(auto_df),n_folds = 10)
mses =[]
for train,test in kfold:
Xtrain,ytrain,Xtest,ytest = X[train],y[train],X[test],y[test]
clf.fit(X,y)
ypred = clf.predict(Xtest)
mses.append(mean_squared_error(ypred,ytest))
cv_errors.append(np.mean(mses))
plt.plot(range(1,5),cv_errors)
plt.xlabel("degree")
plt.ylabel('cv.error')
Out[19]:
In [20]:
from sklearn.cross_validation import Bootstrap
In [24]:
cv_errors = []
for ncols in range(2,6):
X = auto_df[colnames[0:ncols]].values
y = auto_df['mpg'].values
n = len(auto_df)
bs = Bootstrap(n,train_size=int(0.9*n),test_size=int(0.1*n),n_iter=10,random_state=0)
mses = []
for train,test in bs:
Xtrain,ytrain,Xtest,ytest = X[train],y[train],X[test],y[test]
clf = LinearRegression()
clf.fit(X,y)
ypred = clf.predict(Xtest)
mses.append(mean_squared_error(ypred,ytest))
cv_errors.append(np.mean(mses))
plt.plot(range(1,5),cv_errors)
plt.xlabel('degree')
plt.ylabel('cv.error')
Out[24]:
In [ ]: